pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
mfn = pte_mfn(*pte);
- HYPERVISOR_update_va_mapping(
- vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
INVALID_P2M_ENTRY;
BUG_ON(HYPERVISOR_dom_mem_op(
/* 3. Map the new extent in place of old pages. */
for (i = 0; i < (1<<order); i++) {
- HYPERVISOR_update_va_mapping(
+ BUG_ON(HYPERVISOR_update_va_mapping(
vstart + (i*PAGE_SIZE),
- __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0);
+ __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
}
#ifdef CONFIG_X86_64
xen_l1_entry_update(pte, __pte(0));
#else
- HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
+ BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
+ __pte_ma(0), 0));
#endif
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
INVALID_P2M_ENTRY;
unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
if (!pte_write(*virt_to_ptep(va)))
- HYPERVISOR_update_va_mapping(
- va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0);
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
ClearPageForeign(pte);
set_page_count(pte, 1);
if (!pte_write(*ptep)) {
xen_pgd_unpin(__pa(pgd));
- HYPERVISOR_update_va_mapping(
+ BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)pgd,
pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
- 0);
+ 0));
}
/* in the PAE case user pgd entries are overwritten before usage */
if (PageHighMem(page))
return;
- HYPERVISOR_update_va_mapping(
+ BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte(pfn, flags), 0);
+ pfn_pte(pfn, flags), 0));
}
static void mm_walk(struct mm_struct *mm, pgprot_t flags)
spin_lock(&mm->page_table_lock);
mm_walk(mm, PAGE_KERNEL_RO);
- HYPERVISOR_update_va_mapping(
+ BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)mm->pgd,
pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
- UVMF_TLB_FLUSH);
+ UVMF_TLB_FLUSH));
xen_pgd_pin(__pa(mm->pgd));
mm->context.pinned = 1;
spin_lock(&mm_unpinned_lock);
spin_lock(&mm->page_table_lock);
xen_pgd_unpin(__pa(mm->pgd));
- HYPERVISOR_update_va_mapping(
+ BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)mm->pgd,
- pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0);
+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
mm_walk(mm, PAGE_KERNEL);
xen_tlb_flush();
mm->context.pinned = 0;
/* Link back into the page tables if it's not a highmem page. */
if ( pfn < max_low_pfn )
{
- HYPERVISOR_update_va_mapping(
+ BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma((mfn_list[i] << PAGE_SHIFT) |
pgprot_val(PAGE_KERNEL)),
- 0);
+ 0));
}
/* Finally, relinquish the memory back to the system allocator. */
{
v = phys_to_virt(pfn << PAGE_SHIFT);
scrub_pages(v, 1);
- HYPERVISOR_update_va_mapping(
- (unsigned long)v, __pte_ma(0), 0);
+ BUG_ON(HYPERVISOR_update_va_mapping(
+ (unsigned long)v, __pte_ma(0), 0));
}
#ifdef CONFIG_XEN_SCRUB_PAGES
else
"4" (flags)
: "memory" );
- if ( unlikely(ret < 0) )
- {
- printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
- va, (new_val).pte_low, flags);
- BUG();
- }
-
return ret;
}
do { \
if (__dirty) { \
if ( likely((__vma)->vm_mm == current->mm) ) { \
- HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits)); \
+ BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
} else { \
xen_l1_entry_update((__ptep), (__entry)); \
flush_tlb_page((__vma), (__address)); \